Construct Single-Hierarchical P/NBD Model for Short Timeframe Synthetic Data

Author

Mick Cooney

Published

June 9, 2023

In this workbook we construct the non-hierarchical P/NBD models on the synthetic data with the longer timeframe.

1 Load and Construct Datasets

We start by modelling the P/NBD model using our synthetic datasets before we try to model real-life data.

Show code
use_fit_start_date <- as.Date("2020-01-01")
use_fit_end_date   <- as.Date("2022-01-01")

use_valid_start_date <- as.Date("2022-01-01")
use_valid_end_date   <- as.Date("2023-01-01")

1.1 Load Long Time-frame Synthetic Data

We now want to load the short time-frame synthetic data.

Show code
customer_cohortdata_tbl <- read_rds("data/synthdata_shortframe_cohort_tbl.rds")
customer_cohortdata_tbl |> glimpse()
Rows: 50,000
Columns: 4
$ customer_id    <chr> "SFC202001_0001", "SFC202001_0002", "SFC202001_0003", "…
$ cohort_qtr     <chr> "2020 Q1", "2020 Q1", "2020 Q1", "2020 Q1", "2020 Q1", …
$ cohort_ym      <chr> "2020 01", "2020 01", "2020 01", "2020 01", "2020 01", …
$ first_tnx_date <date> 2020-01-01, 2020-01-01, 2020-01-01, 2020-01-01, 2020-0…
Show code
customer_simparams_tbl  <- read_rds("data/synthdata_shortframe_simparams_tbl.rds")
customer_simparams_tbl |> glimpse()
Rows: 50,000
Columns: 9
$ customer_id     <chr> "SFC202001_0001", "SFC202001_0002", "SFC202001_0003", …
$ cohort_qtr      <chr> "2020 Q1", "2020 Q1", "2020 Q1", "2020 Q1", "2020 Q1",…
$ cohort_ym       <chr> "2020 01", "2020 01", "2020 01", "2020 01", "2020 01",…
$ first_tnx_date  <date> 2020-01-01, 2020-01-01, 2020-01-01, 2020-01-01, 2020-…
$ customer_lambda <dbl> 0.12213805, 0.29987747, 0.31504009, 0.03856001, 0.1881…
$ customer_mu     <dbl> 0.127118566, 0.096184402, 0.052334526, 0.204708842, 0.…
$ customer_tau    <dbl> 29.5956480, 10.9437199, 1.6938450, 2.6798108, 48.75206…
$ customer_amtmn  <dbl> 46.2371662, 111.0425353, 45.8870891, 43.0754249, 10.93…
$ customer_amtcv  <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
Show code
customer_transactions_tbl <- read_rds("data/synthdata_shortframe_transactions_tbl.rds")
customer_transactions_tbl |> glimpse()
Rows: 297,815
Columns: 4
$ customer_id   <chr> "SFC202001_0028", "SFC202001_0006", "SFC202001_0012", "S…
$ tnx_timestamp <dttm> 2020-01-01 00:59:15, 2020-01-01 01:25:43, 2020-01-01 04…
$ invoice_id    <chr> "T20200101-0001", "T20200101-0002", "T20200101-0003", "T…
$ tnx_amount    <dbl> 269.07, 124.62, 20.81, 14.76, 16.02, 19.16, 211.91, 43.9…

1.2 Load Derived Data

Show code
id_1000  <- read_rds("data/shortsynth_id_1000.rds")
id_5000  <- read_rds("data/shortsynth_id_5000.rds")
id_10000 <- read_rds("data/shortsynth_id_10000.rds")

fit_1000_data_tbl  <- read_rds("data/shortsynth_fit_1000_data_tbl.rds")
fit_10000_data_tbl <- read_rds("data/shortsynth_fit_10000_data_tbl.rds")

customer_fit_stats_tbl    <- fit_1000_data_tbl
customer_summarystats_tbl <- read_rds("data/shortsynth_customer_summarystats_tbl.rds")

obs_fitdata_tbl   <- read_rds("data/shortsynth_obs_fitdata_tbl.rds")
obs_validdata_tbl <- read_rds("data/shortsynth_obs_validdata_tbl.rds")

Finally, we need to set our directories where we save our Stan code and the model outputs.

Show code
stan_modeldir <- "stan_models"
stan_codedir  <-   "stan_code"

2 Fit First Hierarchical Lambda Model

Our first hierarchical model puts a hierarchical prior around the mean of our population \(\lambda\) - lambda_mn.

Once again we use a Gamma prior for it.

2.1 Compile and Fit Stan Model

We now compile this model using CmdStanR.

Show code
pnbd_onehierlambda_stanmodel <- cmdstan_model(
  "stan_code/pnbd_onehier_lambda.stan",
  include_paths =   stan_codedir,
  pedantic      =           TRUE,
  dir           =  stan_modeldir
  )

We then use this compiled model with our data to produce a fit of the data.

Show code
stan_modelname <- "pnbd_init_short_onehierlambda1"
stanfit_prefix <- str_c("fit_", stan_modelname)
stanfit_seed   <- stanfit_seed + 1

stanfit_object_file <- glue("data/{stanfit_prefix}_stanfit.rds")


stan_data_lst <- customer_fit_stats_tbl |>
  select(customer_id, x, t_x, T_cal) |>
  compose_data(
    hier_lambda_mn_p1 = 0.25,
    hier_lambda_mn_p2 = 1,

    lambda_cv = 1.00,
    
    mu_mn     = 0.10,
    mu_cv     = 1.00,
    )

if(!file_exists(stanfit_object_file)) {
  pnbd_init_short_onehierlambda1_stanfit <- pnbd_onehierlambda_stanmodel$sample(
    data            =                stan_data_lst,
    chains          =                            4,
    iter_warmup     =                          500,
    iter_sampling   =                          500,
    seed            =                 stanfit_seed,
    save_warmup     =                         TRUE,
    output_dir      =                stan_modeldir,
    output_basename =               stanfit_prefix,
    )
  
  pnbd_init_short_onehierlambda1_stanfit$save_object(stanfit_object_file, compress = "gzip")

} else {
  pnbd_init_short_onehierlambda1_stanfit <- read_rds(stanfit_object_file)
}

pnbd_init_short_onehierlambda1_stanfit$summary()
# A tibble: 3,003 × 10
   variable       mean   median      sd     mad       q5      q95  rhat ess_bulk
   <chr>         <num>    <num>   <num>   <num>    <num>    <num> <num>    <num>
 1 lp__       -1.47e+4 -1.47e+4 35.0    34.7    -1.47e+4 -1.46e+4 1.01      472.
 2 lambda_mn   2.63e-1  2.62e-1  0.0111  0.0111  2.45e-1  2.82e-1 1.00     2614.
 3 lambda[1]   6.66e-1  6.62e-1  0.0761  0.0729  5.49e-1  7.97e-1 1.00     5065.
 4 lambda[2]   7.61e-1  7.47e-1  0.202   0.200   4.61e-1  1.12e+0 1.00     5072.
 5 lambda[3]   1.44e-1  8.09e-2  0.170   0.0932  4.79e-3  4.91e-1 0.999    2544.
 6 lambda[4]   2.89e-1  2.30e-1  0.224   0.180   3.96e-2  7.43e-1 1.00     3157.
 7 lambda[5]   5.89e-2  5.26e-2  0.0326  0.0296  1.72e-2  1.23e-1 1.00     3255.
 8 lambda[6]   1.46e-1  8.42e-2  0.176   0.0976  4.69e-3  5.03e-1 0.999    2197.
 9 lambda[7]   5.06e-1  4.88e-1  0.187   0.174   2.36e-1  8.33e-1 1.00     3462.
10 lambda[8]   2.08e-1  1.89e-1  0.108   0.0997  6.99e-2  4.14e-1 1.00     4389.
# ℹ 2,993 more rows
# ℹ 1 more variable: ess_tail <num>

We have some basic HMC-based validity statistics we can check.

Show code
pnbd_init_short_onehierlambda1_stanfit$cmdstan_diagnose()
Processing csv files: /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehierlambda1-1.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehierlambda1-2.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehierlambda1-3.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehierlambda1-4.csvWarning: non-fatal error reading adaptation data


Checking sampler transitions treedepth.
Treedepth satisfactory for all transitions.

Checking sampler transitions for divergences.
No divergent transitions found.

Checking E-BFMI - sampler transitions HMC potential energy.
E-BFMI satisfactory.

Effective sample size satisfactory.

Split R-hat values satisfactory all parameters.

Processing complete, no problems detected.

2.2 Visual Diagnostics of the Sample Validity

Now that we have a sample from the posterior distribution we need to create a few different visualisations of the diagnostics.

Show code
parameter_subset <- c(
  "lambda_mn",
  "lambda[1]", "lambda[2]", "lambda[3]", "lambda[4]",
  "mu[1]",     "mu[2]",     "mu[3]",     "mu[4]"
  )

pnbd_init_short_onehierlambda1_stanfit$draws(inc_warmup = FALSE) |>
  mcmc_trace(pars = parameter_subset) +
  expand_limits(y = 0) +
  labs(
    x = "Iteration",
    y = "Value",
    title = "Traceplot of Sample of Lambda and Mu Values"
    ) +
  theme(axis.text.x = element_text(size = 10))

We also check \(N_{eff}\) as a quick diagnostic of the fit.

Show code
pnbd_init_short_onehierlambda1_stanfit |>
  neff_ratio(pars = c("lambda", "mu")) |>
  as.numeric() |>
  mcmc_neff() +
    ggtitle("Plot of Parameter Effective Sample Sizes")

2.3 Assess the Model

As we intend to run the same logic to assess each of our models, we have combined all this logic into a single function run_model_assessment, to run the simulations and combine the datasets.

Show code
pnbd_init_short_onehierlambda1_assess_data_lst <- run_model_assessment(
  model_stanfit    = pnbd_init_short_onehierlambda1_stanfit,
  insample_tbl     = customer_fit_stats_tbl,
  outsample_tbl    = customer_valid_stats_tbl,
  fit_label        = "pnbd_init_short_onehierlambda1",
  fit_end_dttm     = use_fit_end_date     |> as.POSIXct(),
  valid_start_dttm = use_valid_start_date |> as.POSIXct(),
  valid_end_dttm   = use_valid_end_date   |> as.POSIXct(),
  sim_seed         = 4210
  )

pnbd_init_short_onehierlambda1_assess_data_lst |> glimpse()
List of 3
 $ model_simstats_filepath      : 'glue' chr "data/pnbd_init_short_onehierlambda1_assess_model_simstats_tbl.rds"
 $ model_fit_simstats_filepath  : 'glue' chr "data/pnbd_init_short_onehierlambda1_assess_fit_simstats_tbl.rds"
 $ model_valid_simstats_filepath: 'glue' chr "data/pnbd_init_short_onehierlambda1_assess_valid_simstats_tbl.rds"

2.3.1 Check In-Sample Data Validation

We first check the model against the in-sample data.

Show code
simdata_tbl <- pnbd_init_short_onehierlambda1_assess_data_lst |>
  use_series(model_fit_simstats_filepath) |>
  read_rds()

insample_plots_lst <- create_model_assessment_plots(
  obsdata_tbl = obs_fitdata_tbl,
  simdata_tbl = simdata_tbl
  )

insample_plots_lst$multi_plot |> print()

Show code
insample_plots_lst$total_plot |> print()

Show code
insample_plots_lst$quant_plot |> print()

This fit looks reasonable and appears to capture most of the aspects of the data used to fit it. Given that this is a synthetic dataset, this is not surprising, but at least we appreciate that our model is valid.

2.3.2 Check Out-of-Sample Data Validation

We now repeat for the out-of-sample data.

Show code
simdata_tbl <- pnbd_init_short_onehierlambda1_assess_data_lst |>
  use_series(model_valid_simstats_filepath) |>
  read_rds()

outsample_plots_lst <- create_model_assessment_plots(
  obsdata_tbl = obs_validdata_tbl,
  simdata_tbl = simdata_tbl
  )

outsample_plots_lst$multi_plot |> print()

Show code
outsample_plots_lst$total_plot |> print()

Show code
outsample_plots_lst$quant_plot |> print()

As for our short time frame data, overall our model is working well.

3 Fit Second Hierarchical Lambda Model

In this model, we are going with a broadly similar model but we are instead using a different mean for our hierarchical prior.

3.1 Fit Stan Model

We now want to fit the model to our data using this alternative prior for lambda_mn.

Show code
stan_modelname <- "pnbd_init_short_onehierlambda2"
stanfit_prefix <- str_c("fit_", stan_modelname)
stanfit_seed   <- stanfit_seed + 1

stanfit_object_file <- glue("data/{stanfit_prefix}_stanfit.rds")


stan_data_lst <- customer_fit_stats_tbl |>
  select(customer_id, x, t_x, T_cal) |>
  compose_data(
    hier_lambda_mn_p1 = 0.50,
    hier_lambda_mn_p2 = 1,

    lambda_cv = 1.00,
    
    mu_mn     = 0.10,
    mu_cv     = 1.00,
    )

if(!file_exists(stanfit_object_file)) {
  pnbd_init_short_onehierlambda2_stanfit <- pnbd_onehierlambda_stanmodel$sample(
    data            =                stan_data_lst,
    chains          =                            4,
    iter_warmup     =                          500,
    iter_sampling   =                          500,
    seed            =                 stanfit_seed,
    save_warmup     =                         TRUE,
    output_dir      =                stan_modeldir,
    output_basename =               stanfit_prefix,
    )
  
  pnbd_init_short_onehierlambda2_stanfit$save_object(stanfit_object_file, compress = "gzip")

} else {
  pnbd_init_short_onehierlambda2_stanfit <- read_rds(stanfit_object_file)
}

pnbd_init_short_onehierlambda2_stanfit$summary()
# A tibble: 3,003 × 10
   variable       mean   median      sd     mad       q5      q95  rhat ess_bulk
   <chr>         <num>    <num>   <num>   <num>    <num>    <num> <num>    <num>
 1 lp__       -1.47e+4 -1.46e+4 34.6    33.8    -1.47e+4 -1.46e+4 1.00      757.
 2 lambda_mn   2.63e-1  2.63e-1  0.0113  0.0115  2.45e-1  2.82e-1 1.00     2595.
 3 lambda[1]   6.67e-1  6.63e-1  0.0767  0.0744  5.50e-1  8.00e-1 0.999    4817.
 4 lambda[2]   7.59e-1  7.44e-1  0.194   0.195   4.72e-1  1.09e+0 1.00     4384.
 5 lambda[3]   1.48e-1  8.48e-2  0.177   0.0987  4.05e-3  5.04e-1 1.00     2912.
 6 lambda[4]   2.91e-1  2.21e-1  0.246   0.184   3.73e-2  7.73e-1 1.00     4189.
 7 lambda[5]   5.83e-2  5.15e-2  0.0321  0.0273  1.80e-2  1.19e-1 0.999    3609.
 8 lambda[6]   1.44e-1  8.47e-2  0.170   0.0980  5.04e-3  4.81e-1 1.00     3119.
 9 lambda[7]   5.09e-1  4.84e-1  0.189   0.181   2.49e-1  8.57e-1 1.00     3730.
10 lambda[8]   2.10e-1  1.90e-1  0.108   0.101   7.08e-2  4.12e-1 1.00     3847.
# ℹ 2,993 more rows
# ℹ 1 more variable: ess_tail <num>

We have some basic HMC-based validity statistics we can check.

Show code
pnbd_init_short_onehierlambda2_stanfit$cmdstan_diagnose()
Processing csv files: /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehierlambda2-1.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehierlambda2-2.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehierlambda2-3.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehierlambda2-4.csvWarning: non-fatal error reading adaptation data


Checking sampler transitions treedepth.
Treedepth satisfactory for all transitions.

Checking sampler transitions for divergences.
No divergent transitions found.

Checking E-BFMI - sampler transitions HMC potential energy.
E-BFMI satisfactory.

Effective sample size satisfactory.

Split R-hat values satisfactory all parameters.

Processing complete, no problems detected.

3.2 Visual Diagnostics of the Sample Validity

Now that we have a sample from the posterior distribution we need to create a few different visualisations of the diagnostics.

Show code
parameter_subset <- c(
  "lambda_mn",
  "lambda[1]", "lambda[2]", "lambda[3]", "lambda[4]",
  "mu[1]",     "mu[2]",     "mu[3]",     "mu[4]"
  )

pnbd_init_short_onehierlambda2_stanfit$draws(inc_warmup = FALSE) |>
  mcmc_trace(pars = parameter_subset) +
  expand_limits(y = 0) +
  labs(
    x = "Iteration",
    y = "Value",
    title = "Traceplot of Sample of Lambda and Mu Values"
    ) +
  theme(axis.text.x = element_text(size = 10))

We also check \(N_{eff}\) as a quick diagnostic of the fit.

Show code
pnbd_init_short_onehierlambda2_stanfit |>
  neff_ratio(pars = c("lambda", "mu")) |>
  as.numeric() |>
  mcmc_neff() +
    ggtitle("Plot of Parameter Effective Sample Sizes")

3.3 Assess the Model

As we intend to run the same logic to assess each of our models, we have combined all this logic into a single function run_model_assessment, to run the simulations and combine the datasets.

Show code
pnbd_init_short_onehierlambda2_assess_data_lst <- run_model_assessment(
  model_stanfit    = pnbd_init_short_onehierlambda2_stanfit,
  insample_tbl     = customer_fit_stats_tbl,
  outsample_tbl    = customer_valid_stats_tbl,
  fit_label        = "pnbd_init_short_onehierlambda2",
  fit_end_dttm     = use_fit_end_date     |> as.POSIXct(),
  valid_start_dttm = use_valid_start_date |> as.POSIXct(),
  valid_end_dttm   = use_valid_end_date   |> as.POSIXct(),
  sim_seed         = 4210
  )

pnbd_init_short_onehierlambda2_assess_data_lst |> glimpse()
List of 3
 $ model_simstats_filepath      : 'glue' chr "data/pnbd_init_short_onehierlambda2_assess_model_simstats_tbl.rds"
 $ model_fit_simstats_filepath  : 'glue' chr "data/pnbd_init_short_onehierlambda2_assess_fit_simstats_tbl.rds"
 $ model_valid_simstats_filepath: 'glue' chr "data/pnbd_init_short_onehierlambda2_assess_valid_simstats_tbl.rds"

3.3.1 Check In-Sample Data Validation

We first check the model against the in-sample data.

Show code
simdata_tbl <- pnbd_init_short_onehierlambda2_assess_data_lst |>
  use_series(model_fit_simstats_filepath) |>
  read_rds()

insample_plots_lst <- create_model_assessment_plots(
  obsdata_tbl = obs_fitdata_tbl,
  simdata_tbl = simdata_tbl
  )

insample_plots_lst$multi_plot |> print()

Show code
insample_plots_lst$total_plot |> print()

Show code
insample_plots_lst$quant_plot |> print()

This fit looks reasonable and appears to capture most of the aspects of the data used to fit it. Given that this is a synthetic dataset, this is not surprising, but at least we appreciate that our model is valid.

3.3.2 Check Out-of-Sample Data Validation

We now repeat for the out-of-sample data.

Show code
simdata_tbl <- pnbd_init_short_onehierlambda1_assess_data_lst |>
  use_series(model_valid_simstats_filepath) |>
  read_rds()

outsample_plots_lst <- create_model_assessment_plots(
  obsdata_tbl = obs_validdata_tbl,
  simdata_tbl = simdata_tbl
  )

outsample_plots_lst$multi_plot |> print()

Show code
outsample_plots_lst$total_plot |> print()

Show code
outsample_plots_lst$quant_plot |> print()

As for our short time frame data, overall our model is working well.

4 Fit First Hierarchical Mu Model

We now construct the same hierarchical model but based around mu_mn.

4.1 Compile and Fit Stan Model

We compile this model using CmdStanR.

Show code
pnbd_onehiermu_stanmodel <- cmdstan_model(
  "stan_code/pnbd_onehier_mu.stan",
  include_paths =   stan_codedir,
  pedantic      =           TRUE,
  dir           =  stan_modeldir
  )

We then use this compiled model with our data to produce a fit of the data.

Show code
stan_modelname <- "pnbd_init_short_onehiermu1"
stanfit_prefix <- str_c("fit_", stan_modelname)
stanfit_seed   <- stanfit_seed + 1

stanfit_object_file <- glue("data/{stanfit_prefix}_stanfit.rds")


stan_data_lst <- customer_fit_stats_tbl |>
  select(customer_id, x, t_x, T_cal) |>
  compose_data(
    hier_mu_mn_p1 = 0.50,
    hier_mu_mn_p2 = 1.00,

    lambda_mn     = 0.25,
    lambda_cv     = 1.00,

    mu_cv         = 1.00

    )

if(!file_exists(stanfit_object_file)) {
  pnbd_init_short_onehiermu1_stanfit <- pnbd_onehiermu_stanmodel$sample(
    data            =                stan_data_lst,
    chains          =                            4,
    iter_warmup     =                          500,
    iter_sampling   =                          500,
    seed            =                 stanfit_seed,
    save_warmup     =                         TRUE,
    output_dir      =                stan_modeldir,
    output_basename =               stanfit_prefix,
    )
  
  pnbd_init_short_onehiermu1_stanfit$save_object(stanfit_object_file, compress = "gzip")

} else {
  pnbd_init_short_onehiermu1_stanfit <- read_rds(stanfit_object_file)
}

pnbd_init_short_onehiermu1_stanfit$summary()
# A tibble: 3,003 × 10
   variable       mean   median      sd     mad       q5      q95  rhat ess_bulk
   <chr>         <num>    <num>   <num>   <num>    <num>    <num> <num>    <num>
 1 lp__       -1.37e+4 -1.37e+4 3.59e+1 3.57e+1 -1.38e+4 -1.37e+4 1.01      531.
 2 mu_mn       1.06e-1  1.05e-1 7.26e-3 7.20e-3  9.42e-2  1.18e-1 1.00      891.
 3 lambda[1]   6.65e-1  6.61e-1 7.55e-2 7.79e-2  5.49e-1  7.96e-1 1.00     4584.
 4 lambda[2]   7.51e-1  7.33e-1 2.00e-1 1.91e-1  4.62e-1  1.12e+0 1.00     4930.
 5 lambda[3]   1.46e-1  8.25e-2 1.85e-1 9.49e-2  4.90e-3  4.88e-1 1.00     2876.
 6 lambda[4]   2.90e-1  2.32e-1 2.35e-1 1.96e-1  3.93e-2  7.56e-1 1.00     3733.
 7 lambda[5]   5.81e-2  5.27e-2 3.16e-2 2.87e-2  1.76e-2  1.20e-1 1.00     4621.
 8 lambda[6]   1.47e-1  8.49e-2 1.85e-1 9.88e-2  4.68e-3  5.07e-1 0.999    3456.
 9 lambda[7]   5.02e-1  4.74e-1 1.86e-1 1.79e-1  2.45e-1  8.58e-1 1.00     5024.
10 lambda[8]   2.07e-1  1.87e-1 1.13e-1 1.02e-1  6.11e-2  4.11e-1 1.00     3959.
# ℹ 2,993 more rows
# ℹ 1 more variable: ess_tail <num>

We have some basic HMC-based validity statistics we can check.

Show code
pnbd_init_short_onehiermu1_stanfit$cmdstan_diagnose()
Processing csv files: /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehiermu1-1.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehiermu1-2.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehiermu1-3.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehiermu1-4.csvWarning: non-fatal error reading adaptation data


Checking sampler transitions treedepth.
Treedepth satisfactory for all transitions.

Checking sampler transitions for divergences.
No divergent transitions found.

Checking E-BFMI - sampler transitions HMC potential energy.
E-BFMI satisfactory.

Effective sample size satisfactory.

Split R-hat values satisfactory all parameters.

Processing complete, no problems detected.

4.2 Visual Diagnostics of the Sample Validity

Now that we have a sample from the posterior distribution we need to create a few different visualisations of the diagnostics.

Show code
parameter_subset <- c(
  "mu_mn",
  "lambda[1]", "lambda[2]", "lambda[3]", "lambda[4]",
  "mu[1]",     "mu[2]",     "mu[3]",     "mu[4]"
  )

pnbd_init_short_onehiermu1_stanfit$draws(inc_warmup = FALSE) |>
  mcmc_trace(pars = parameter_subset) +
  expand_limits(y = 0) +
  labs(
    x = "Iteration",
    y = "Value",
    title = "Traceplot of Sample of Lambda and Mu Values"
    ) +
  theme(axis.text.x = element_text(size = 10))

We also check \(N_{eff}\) as a quick diagnostic of the fit.

Show code
pnbd_init_short_onehiermu1_stanfit |>
  neff_ratio(pars = c("lambda", "mu")) |>
  as.numeric() |>
  mcmc_neff() +
    ggtitle("Plot of Parameter Effective Sample Sizes")

4.3 Assess the Model

As we intend to run the same logic to assess each of our models, we have combined all this logic into a single function run_model_assessment, to run the simulations and combine the datasets.

Show code
pnbd_init_short_onehiermu1_assess_data_lst <- run_model_assessment(
  model_stanfit    = pnbd_init_short_onehiermu1_stanfit,
  insample_tbl     = customer_fit_stats_tbl,
  outsample_tbl    = customer_valid_stats_tbl,
  fit_label        = "pnbd_init_short_onehiermu1",
  fit_end_dttm     = use_fit_end_date     |> as.POSIXct(),
  valid_start_dttm = use_valid_start_date |> as.POSIXct(),
  valid_end_dttm   = use_valid_end_date   |> as.POSIXct(),
  sim_seed         = 4210
  )

pnbd_init_short_onehiermu1_assess_data_lst |> glimpse()
List of 3
 $ model_simstats_filepath      : 'glue' chr "data/pnbd_init_short_onehiermu1_assess_model_simstats_tbl.rds"
 $ model_fit_simstats_filepath  : 'glue' chr "data/pnbd_init_short_onehiermu1_assess_fit_simstats_tbl.rds"
 $ model_valid_simstats_filepath: 'glue' chr "data/pnbd_init_short_onehiermu1_assess_valid_simstats_tbl.rds"

4.3.1 Check In-Sample Data Validation

We first check the model against the in-sample data.

Show code
simdata_tbl <- pnbd_init_short_onehiermu1_assess_data_lst |>
  use_series(model_fit_simstats_filepath) |>
  read_rds()

insample_plots_lst <- create_model_assessment_plots(
  obsdata_tbl = obs_fitdata_tbl,
  simdata_tbl = simdata_tbl
  )

insample_plots_lst$multi_plot |> print()

Show code
insample_plots_lst$total_plot |> print()

Show code
insample_plots_lst$quant_plot |> print()

This fit looks reasonable and appears to capture most of the aspects of the data used to fit it. Given that this is a synthetic dataset, this is not surprising, but at least we appreciate that our model is valid.

4.3.2 Check Out-of-Sample Data Validation

We now repeat for the out-of-sample data.

Show code
simdata_tbl <- pnbd_init_short_onehierlambda1_assess_data_lst |>
  use_series(model_valid_simstats_filepath) |>
  read_rds()

outsample_plots_lst <- create_model_assessment_plots(
  obsdata_tbl = obs_validdata_tbl,
  simdata_tbl = simdata_tbl
  )

outsample_plots_lst$multi_plot |> print()

Show code
outsample_plots_lst$total_plot |> print()

Show code
outsample_plots_lst$quant_plot |> print()

As for our short time frame data, overall our model is working well.

5 Fit Second Hierarchical Lambda Model

In this model, we are going with a broadly similar model but we are instead using a different mean for our hierarchical prior.

5.1 Fit Stan Model

We now want to fit the model to our data using this alternative prior for lambda_mn.

Show code
stan_modelname <- "pnbd_init_short_onehiermu2"
stanfit_prefix <- str_c("fit_", stan_modelname)
stanfit_seed   <- stanfit_seed + 1

stanfit_object_file <- glue("data/{stanfit_prefix}_stanfit.rds")


stan_data_lst <- customer_fit_stats_tbl |>
  select(customer_id, x, t_x, T_cal) |>
  compose_data(
    hier_mu_mn_p1 = 0.25,
    hier_mu_mn_p2 = 1.00,

    lambda_mn     = 0.25,
    lambda_cv     = 1.00,

    mu_cv         = 1.00
    )

if(!file_exists(stanfit_object_file)) {
  pnbd_init_short_onehiermu2_stanfit <- pnbd_onehiermu_stanmodel$sample(
    data            =                stan_data_lst,
    chains          =                            4,
    iter_warmup     =                          500,
    iter_sampling   =                          500,
    seed            =                 stanfit_seed,
    save_warmup     =                         TRUE,
    output_dir      =                stan_modeldir,
    output_basename =               stanfit_prefix,
    )
  
  pnbd_init_short_onehiermu2_stanfit$save_object(stanfit_object_file, compress = "gzip")

} else {
  pnbd_init_short_onehiermu2_stanfit <- read_rds(stanfit_object_file)
}

pnbd_init_short_onehiermu2_stanfit$summary()
# A tibble: 3,003 × 10
   variable       mean   median      sd     mad       q5      q95  rhat ess_bulk
   <chr>         <num>    <num>   <num>   <num>    <num>    <num> <num>    <num>
 1 lp__       -1.37e+4 -1.37e+4 3.59e+1 3.46e+1 -1.38e+4 -1.37e+4  1.00     498.
 2 mu_mn       1.05e-1  1.05e-1 7.64e-3 7.50e-3  9.33e-2  1.19e-1  1.00     914.
 3 lambda[1]   6.67e-1  6.63e-1 7.94e-2 7.69e-2  5.43e-1  8.03e-1  1.01    5316.
 4 lambda[2]   7.52e-1  7.38e-1 1.98e-1 2.02e-1  4.53e-1  1.11e+0  1.00    5029.
 5 lambda[3]   1.45e-1  8.19e-2 1.77e-1 9.28e-2  4.84e-3  4.97e-1  1.00    2854.
 6 lambda[4]   2.83e-1  2.15e-1 2.39e-1 1.81e-1  3.63e-2  7.29e-1  1.00    3342.
 7 lambda[5]   5.87e-2  5.34e-2 3.12e-2 2.83e-2  1.88e-2  1.19e-1  1.00    3307.
 8 lambda[6]   1.42e-1  8.65e-2 1.73e-1 9.67e-2  4.78e-3  4.68e-1  1.00    3498.
 9 lambda[7]   5.04e-1  4.78e-1 1.90e-1 1.66e-1  2.39e-1  8.71e-1  1.00    5589.
10 lambda[8]   2.07e-1  1.85e-1 1.13e-1 1.04e-1  6.61e-2  4.20e-1  1.00    4643.
# ℹ 2,993 more rows
# ℹ 1 more variable: ess_tail <num>

We have some basic HMC-based validity statistics we can check.

Show code
pnbd_init_short_onehiermu2_stanfit$cmdstan_diagnose()
Processing csv files: /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehiermu2-1.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehiermu2-2.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehiermu2-3.csvWarning: non-fatal error reading adaptation data
, /home/rstudio/btydwork/stan_models/fit_pnbd_init_short_onehiermu2-4.csvWarning: non-fatal error reading adaptation data


Checking sampler transitions treedepth.
Treedepth satisfactory for all transitions.

Checking sampler transitions for divergences.
No divergent transitions found.

Checking E-BFMI - sampler transitions HMC potential energy.
E-BFMI satisfactory.

Effective sample size satisfactory.

Split R-hat values satisfactory all parameters.

Processing complete, no problems detected.

5.2 Visual Diagnostics of the Sample Validity

Now that we have a sample from the posterior distribution we need to create a few different visualisations of the diagnostics.

Show code
parameter_subset <- c(
  "mu_mn",
  "lambda[1]", "lambda[2]", "lambda[3]", "lambda[4]",
  "mu[1]",     "mu[2]",     "mu[3]",     "mu[4]"
  )

pnbd_init_short_onehiermu2_stanfit$draws(inc_warmup = FALSE) |>
  mcmc_trace(pars = parameter_subset) +
  expand_limits(y = 0) +
  labs(
    x = "Iteration",
    y = "Value",
    title = "Traceplot of Sample of Lambda and Mu Values"
    ) +
  theme(axis.text.x = element_text(size = 10))

We also check \(N_{eff}\) as a quick diagnostic of the fit.

Show code
pnbd_init_short_onehiermu2_stanfit |>
  neff_ratio(pars = c("lambda", "mu")) |>
  as.numeric() |>
  mcmc_neff() +
    ggtitle("Plot of Parameter Effective Sample Sizes")

5.3 Assess the Model

As we intend to run the same logic to assess each of our models, we have combined all this logic into a single function run_model_assessment, to run the simulations and combine the datasets.

Show code
pnbd_init_short_onehiermu2_assess_data_lst <- run_model_assessment(
  model_stanfit    = pnbd_init_short_onehiermu2_stanfit,
  insample_tbl     = customer_fit_stats_tbl,
  outsample_tbl    = customer_valid_stats_tbl,
  fit_label        = "pnbd_init_short_onehiermu2",
  fit_end_dttm     = use_fit_end_date     |> as.POSIXct(),
  valid_start_dttm = use_valid_start_date |> as.POSIXct(),
  valid_end_dttm   = use_valid_end_date   |> as.POSIXct(),
  sim_seed         = 4210
  )

pnbd_init_short_onehiermu2_assess_data_lst |> glimpse()
List of 3
 $ model_simstats_filepath      : 'glue' chr "data/pnbd_init_short_onehiermu2_assess_model_simstats_tbl.rds"
 $ model_fit_simstats_filepath  : 'glue' chr "data/pnbd_init_short_onehiermu2_assess_fit_simstats_tbl.rds"
 $ model_valid_simstats_filepath: 'glue' chr "data/pnbd_init_short_onehiermu2_assess_valid_simstats_tbl.rds"

5.3.1 Check In-Sample Data Validation

We first check the model against the in-sample data.

Show code
simdata_tbl <- pnbd_init_short_onehiermu2_assess_data_lst |>
  use_series(model_fit_simstats_filepath) |>
  read_rds()

insample_plots_lst <- create_model_assessment_plots(
  obsdata_tbl = obs_fitdata_tbl,
  simdata_tbl = simdata_tbl
  )

insample_plots_lst$multi_plot |> print()

Show code
insample_plots_lst$total_plot |> print()

Show code
insample_plots_lst$quant_plot |> print()

This fit looks reasonable and appears to capture most of the aspects of the data used to fit it. Given that this is a synthetic dataset, this is not surprising, but at least we appreciate that our model is valid.

5.3.2 Check Out-of-Sample Data Validation

We now repeat for the out-of-sample data.

Show code
simdata_tbl <- pnbd_init_short_onehiermu1_assess_data_lst |>
  use_series(model_valid_simstats_filepath) |>
  read_rds()

outsample_plots_lst <- create_model_assessment_plots(
  obsdata_tbl = obs_validdata_tbl,
  simdata_tbl = simdata_tbl
  )

outsample_plots_lst$multi_plot |> print()

Show code
outsample_plots_lst$total_plot |> print()

Show code
outsample_plots_lst$quant_plot |> print()

As for our short time frame data, overall our model is working well.

6 Compare Model Outputs

We have looked at each of the models individually, but it is also worth looking at each of the models as a group.

Show code
calculate_simulation_statistics <- function(file_rds) {
  simdata_tbl <- read_rds(file_rds)
  
  multicount_cust_tbl <- simdata_tbl |>
    filter(sim_tnx_count > 0) |>
    count(draw_id, name = "multicust_count")
  
  totaltnx_data_tbl <- simdata_tbl |>
    count(draw_id, wt = sim_tnx_count, name = "simtnx_count")
  
  simstats_tbl <- multicount_cust_tbl |>
    inner_join(totaltnx_data_tbl, by = "draw_id")
  
  return(simstats_tbl)
}
Show code
obs_fit_customer_count <- obs_fitdata_tbl |>
  filter(tnx_count > 0) |>
  nrow()

obs_valid_customer_count <- obs_validdata_tbl |>
  filter(tnx_count > 0) |>
  nrow()

obs_fit_total_count <- obs_fitdata_tbl |>
  pull(tnx_count) |>
  sum()

obs_valid_total_count <- obs_validdata_tbl |>
  pull(tnx_count) |>
  sum()


obs_stats_tbl <- tribble(
  ~assess_type, ~name,               ~obs_value,
  "fit",        "multicust_count",   obs_fit_customer_count,
  "fit",        "simtnx_count",      obs_fit_total_count,
  "valid",      "multicust_count",   obs_valid_customer_count,
  "valid",      "simtnx_count",      obs_valid_total_count
  )


model_assess_tbl <- dir_ls("data", regexp = "pnbd_init_short_(one|fixed).*_assess") |>
  enframe(name = NULL, value = "file_path") |>
  filter(str_detect(file_path, "_assess_model_", negate = TRUE)) |>
  mutate(
    model_label = str_replace(file_path, "data/pnbd_init_short_(.*?)_assess_.*", "\\1"),
    assess_type = if_else(str_detect(file_path, "_assess_fit_"), "fit", "valid"),
    
    sim_data = map(
      file_path, calculate_simulation_statistics,
      
      .progress = "calculate_simulation_statistics"
      )
    )

model_assess_tbl |> glimpse()
Rows: 14
Columns: 4
$ file_path   <fs::path> "data/pnbd_init_short_fixed1_assess_fit_simstats_tbl.…
$ model_label <chr> "fixed1", "fixed1", "fixed2", "fixed2", "fixed3", "fixed3"…
$ assess_type <chr> "fit", "valid", "fit", "valid", "fit", "valid", "fit", "va…
$ sim_data    <list> [<tbl_df[2000 x 3]>], [<tbl_df[2000 x 3]>], [<tbl_df[2000…
Show code
model_assess_summstat_tbl <- model_assess_tbl |>
  select(model_label, assess_type, sim_data) |>
  unnest(sim_data) |>
  pivot_longer(
    cols = !c(model_label, assess_type, draw_id)
    ) |>
  group_by(model_label, assess_type, name) |>
  summarise(
    .groups = "drop",
    
    mean_val = mean(value),
    p10 = quantile(value, 0.10),
    p25 = quantile(value, 0.25),
    p50 = quantile(value, 0.50),
    p75 = quantile(value, 0.75),
    p90 = quantile(value, 0.90)
    )

model_assess_summstat_tbl |> glimpse()
Rows: 28
Columns: 9
$ model_label <chr> "fixed1", "fixed1", "fixed1", "fixed1", "fixed2", "fixed2"…
$ assess_type <chr> "fit", "fit", "valid", "valid", "fit", "fit", "valid", "va…
$ name        <chr> "multicust_count", "simtnx_count", "multicust_count", "sim…
$ mean_val    <dbl> 620.9155, 4283.5425, 182.8110, 1862.9220, 680.4490, 3177.8…
$ p10         <dbl> 602.0, 3993.9, 172.0, 1674.0, 661.0, 2981.0, 129.9, 828.0,…
$ p25         <dbl> 611.00, 4118.75, 177.00, 1768.00, 671.00, 3067.00, 134.00,…
$ p50         <dbl> 621.0, 4279.5, 183.0, 1857.0, 681.0, 3175.0, 139.0, 939.0,…
$ p75         <dbl> 631.00, 4437.00, 189.00, 1962.25, 690.00, 3285.25, 144.00,…
$ p90         <dbl> 639.0, 4580.0, 194.0, 2051.0, 698.1, 3384.0, 149.0, 1072.0…
Show code
#! echo: TRUE

ggplot(model_assess_summstat_tbl) +
  geom_errorbar(
    aes(x = model_label, ymin = p10, ymax = p90), width = 0
    ) +
  geom_errorbar(
    aes(x = model_label, ymin = p25, ymax = p75), width = 0, linewidth = 3
    ) +
  geom_hline(
    aes(yintercept = obs_value),
    data = obs_stats_tbl, colour = "red"
    ) +
  scale_y_continuous(labels = label_comma()) +
  expand_limits(y = 0) +
  facet_wrap(
    vars(assess_type, name), scale = "free_y"
    ) +
  labs(
    x = "Model",
    y = "Count",
    title = "Comparison Plot for the Different Models"
    ) +
  theme(
    axis.text.x = element_text(angle = 20, vjust = 0.5, size = 8)
    )

6.1 Write Assessment Data to Disk

We now want to save the assessment data to disk.

Show code
model_assess_tbl |> write_rds("data/assess_data_pnbd_short_onehier_tbl.rds")

7 R Environment

Show code
options(width = 120L)
sessioninfo::session_info()
─ Session info ───────────────────────────────────────────────────────────────────────────────────────────────────────
 setting  value
 version  R version 4.2.3 (2023-03-15)
 os       Ubuntu 22.04.2 LTS
 system   x86_64, linux-gnu
 ui       X11
 language (EN)
 collate  en_US.UTF-8
 ctype    en_US.UTF-8
 tz       Europe/Dublin
 date     2023-06-09
 pandoc   2.19.2 @ /usr/local/bin/ (via rmarkdown)

─ Packages ───────────────────────────────────────────────────────────────────────────────────────────────────────────
 package        * version   date (UTC) lib source
 abind            1.4-5     2016-07-21 [1] RSPM (R 4.2.0)
 arrayhelpers     1.1-0     2020-02-04 [1] RSPM (R 4.2.0)
 backports        1.4.1     2021-12-13 [1] RSPM (R 4.2.0)
 base64enc        0.1-3     2015-07-28 [1] RSPM (R 4.2.0)
 bayesplot      * 1.10.0    2022-11-16 [1] RSPM (R 4.2.0)
 boot             1.3-28.1  2022-11-22 [2] CRAN (R 4.2.3)
 bridgesampling   1.1-2     2021-04-16 [1] RSPM (R 4.2.0)
 brms           * 2.19.0    2023-03-14 [1] RSPM (R 4.2.0)
 Brobdingnag      1.2-9     2022-10-19 [1] RSPM (R 4.2.0)
 cachem           1.0.7     2023-02-24 [1] RSPM (R 4.2.0)
 callr            3.7.3     2022-11-02 [1] RSPM (R 4.2.0)
 checkmate        2.1.0     2022-04-21 [1] RSPM (R 4.2.0)
 cli              3.6.1     2023-03-23 [1] RSPM (R 4.2.0)
 cmdstanr       * 0.5.3     2023-05-25 [1] Github (stan-dev/cmdstanr@22b391e)
 coda             0.19-4    2020-09-30 [1] RSPM (R 4.2.0)
 codetools        0.2-19    2023-02-01 [2] CRAN (R 4.2.3)
 colorspace       2.1-0     2023-01-23 [1] RSPM (R 4.2.0)
 colourpicker     1.2.0     2022-10-28 [1] RSPM (R 4.2.0)
 conflicted     * 1.2.0     2023-02-01 [1] RSPM (R 4.2.0)
 cowplot        * 1.1.1     2020-12-30 [1] RSPM (R 4.2.0)
 crayon           1.5.2     2022-09-29 [1] RSPM (R 4.2.0)
 crosstalk        1.2.0     2021-11-04 [1] RSPM (R 4.2.0)
 digest           0.6.31    2022-12-11 [1] RSPM (R 4.2.0)
 directlabels   * 2021.1.13 2021-01-16 [1] RSPM (R 4.2.0)
 distributional   0.3.2     2023-03-22 [1] RSPM (R 4.2.0)
 dplyr          * 1.1.1     2023-03-22 [1] RSPM (R 4.2.0)
 DT               0.27      2023-01-17 [1] RSPM (R 4.2.0)
 dygraphs         1.1.1.6   2018-07-11 [1] RSPM (R 4.2.0)
 ellipsis         0.3.2     2021-04-29 [1] RSPM (R 4.2.0)
 evaluate         0.20      2023-01-17 [1] RSPM (R 4.2.0)
 fansi            1.0.4     2023-01-22 [1] RSPM (R 4.2.0)
 farver           2.1.1     2022-07-06 [1] RSPM (R 4.2.0)
 fastmap          1.1.1     2023-02-24 [1] RSPM (R 4.2.0)
 forcats        * 1.0.0     2023-01-29 [1] RSPM (R 4.2.0)
 fs             * 1.6.1     2023-02-06 [1] RSPM (R 4.2.0)
 furrr          * 0.3.1     2022-08-15 [1] RSPM (R 4.2.0)
 future         * 1.32.0    2023-03-07 [1] RSPM (R 4.2.0)
 gamm4            0.2-6     2020-04-03 [1] RSPM (R 4.2.0)
 generics         0.1.3     2022-07-05 [1] RSPM (R 4.2.0)
 ggdist           3.2.1     2023-01-18 [1] RSPM (R 4.2.0)
 ggplot2        * 3.4.2     2023-04-03 [1] RSPM (R 4.2.0)
 globals          0.16.2    2022-11-21 [1] RSPM (R 4.2.0)
 glue           * 1.6.2     2022-02-24 [1] RSPM (R 4.2.0)
 gridExtra        2.3       2017-09-09 [1] RSPM (R 4.2.0)
 gtable           0.3.3     2023-03-21 [1] RSPM (R 4.2.0)
 gtools           3.9.4     2022-11-27 [1] RSPM (R 4.2.0)
 hms              1.1.3     2023-03-21 [1] RSPM (R 4.2.0)
 htmltools        0.5.5     2023-03-23 [1] RSPM (R 4.2.0)
 htmlwidgets      1.6.2     2023-03-17 [1] RSPM (R 4.2.0)
 httpuv           1.6.9     2023-02-14 [1] RSPM (R 4.2.0)
 igraph           1.4.2     2023-04-07 [1] RSPM (R 4.2.0)
 inline           0.3.19    2021-05-31 [1] RSPM (R 4.2.0)
 jsonlite         1.8.4     2022-12-06 [1] RSPM (R 4.2.0)
 knitr            1.42      2023-01-25 [1] RSPM (R 4.2.0)
 labeling         0.4.2     2020-10-20 [1] RSPM (R 4.2.0)
 later            1.3.0     2021-08-18 [1] RSPM (R 4.2.0)
 lattice          0.20-45   2021-09-22 [2] CRAN (R 4.2.3)
 lifecycle        1.0.3     2022-10-07 [1] RSPM (R 4.2.0)
 listenv          0.9.0     2022-12-16 [1] RSPM (R 4.2.0)
 lme4             1.1-32    2023-03-14 [1] RSPM (R 4.2.0)
 loo              2.6.0     2023-03-31 [1] RSPM (R 4.2.0)
 lubridate      * 1.9.2     2023-02-10 [1] RSPM (R 4.2.0)
 magrittr       * 2.0.3     2022-03-30 [1] RSPM (R 4.2.0)
 markdown         1.6       2023-04-07 [1] RSPM (R 4.2.0)
 MASS             7.3-58.2  2023-01-23 [2] CRAN (R 4.2.3)
 Matrix           1.5-3     2022-11-11 [2] CRAN (R 4.2.3)
 matrixStats      0.63.0    2022-11-18 [1] RSPM (R 4.2.0)
 memoise          2.0.1     2021-11-26 [1] RSPM (R 4.2.0)
 mgcv             1.8-42    2023-03-02 [2] CRAN (R 4.2.3)
 mime             0.12      2021-09-28 [1] RSPM (R 4.2.0)
 miniUI           0.1.1.1   2018-05-18 [1] RSPM (R 4.2.0)
 minqa            1.2.5     2022-10-19 [1] RSPM (R 4.2.0)
 munsell          0.5.0     2018-06-12 [1] RSPM (R 4.2.0)
 mvtnorm          1.1-3     2021-10-08 [1] RSPM (R 4.2.0)
 nlme             3.1-162   2023-01-31 [2] CRAN (R 4.2.3)
 nloptr           2.0.3     2022-05-26 [1] RSPM (R 4.2.0)
 parallelly       1.35.0    2023-03-23 [1] RSPM (R 4.2.0)
 pillar           1.9.0     2023-03-22 [1] RSPM (R 4.2.0)
 pkgbuild         1.4.0     2022-11-27 [1] RSPM (R 4.2.0)
 pkgconfig        2.0.3     2019-09-22 [1] RSPM (R 4.2.0)
 plyr             1.8.8     2022-11-11 [1] RSPM (R 4.2.0)
 posterior      * 1.4.1     2023-03-14 [1] RSPM (R 4.2.0)
 prettyunits      1.1.1     2020-01-24 [1] RSPM (R 4.2.0)
 processx         3.8.1     2023-04-18 [1] RSPM (R 4.2.0)
 projpred         2.5.0     2023-04-05 [1] RSPM (R 4.2.0)
 promises         1.2.0.1   2021-02-11 [1] RSPM (R 4.2.0)
 ps               1.7.5     2023-04-18 [1] RSPM (R 4.2.0)
 purrr          * 1.0.1     2023-01-10 [1] RSPM (R 4.2.0)
 quadprog         1.5-8     2019-11-20 [1] RSPM (R 4.2.0)
 R6               2.5.1     2021-08-19 [1] RSPM (R 4.2.0)
 Rcpp           * 1.0.10    2023-01-22 [1] RSPM (R 4.2.0)
 RcppParallel     5.1.7     2023-02-27 [1] RSPM (R 4.2.0)
 readr          * 2.1.4     2023-02-10 [1] RSPM (R 4.2.0)
 reshape2         1.4.4     2020-04-09 [1] RSPM (R 4.2.0)
 rlang          * 1.1.0     2023-03-14 [1] RSPM (R 4.2.0)
 rmarkdown        2.21      2023-03-26 [1] RSPM (R 4.2.0)
 rstan            2.21.8    2023-01-17 [1] RSPM (R 4.2.0)
 rstantools       2.3.1     2023-03-30 [1] RSPM (R 4.2.0)
 rsyslog        * 1.0.2     2021-06-04 [1] RSPM (R 4.2.0)
 scales         * 1.2.1     2022-08-20 [1] RSPM (R 4.2.0)
 sessioninfo      1.2.2     2021-12-06 [1] RSPM (R 4.2.0)
 shiny            1.7.4     2022-12-15 [1] RSPM (R 4.2.0)
 shinyjs          2.1.0     2021-12-23 [1] RSPM (R 4.2.0)
 shinystan        2.6.0     2022-03-03 [1] RSPM (R 4.2.0)
 shinythemes      1.2.0     2021-01-25 [1] RSPM (R 4.2.0)
 StanHeaders      2.21.0-7  2020-12-17 [1] RSPM (R 4.2.0)
 stringi          1.7.12    2023-01-11 [1] RSPM (R 4.2.0)
 stringr        * 1.5.0     2022-12-02 [1] RSPM (R 4.2.0)
 svUnit           1.0.6     2021-04-19 [1] RSPM (R 4.2.0)
 tensorA          0.36.2    2020-11-19 [1] RSPM (R 4.2.0)
 threejs          0.3.3     2020-01-21 [1] RSPM (R 4.2.0)
 tibble         * 3.2.1     2023-03-20 [1] RSPM (R 4.2.0)
 tidybayes      * 3.0.4     2023-03-14 [1] RSPM (R 4.2.0)
 tidyr          * 1.3.0     2023-01-24 [1] RSPM (R 4.2.0)
 tidyselect       1.2.0     2022-10-10 [1] RSPM (R 4.2.0)
 tidyverse      * 2.0.0     2023-02-22 [1] RSPM (R 4.2.0)
 timechange       0.2.0     2023-01-11 [1] RSPM (R 4.2.0)
 tzdb             0.3.0     2022-03-28 [1] RSPM (R 4.2.0)
 utf8             1.2.3     2023-01-31 [1] RSPM (R 4.2.0)
 vctrs            0.6.2     2023-04-19 [1] RSPM (R 4.2.0)
 withr            2.5.0     2022-03-03 [1] RSPM (R 4.2.0)
 xfun             0.38      2023-03-24 [1] RSPM (R 4.2.0)
 xtable           1.8-4     2019-04-21 [1] RSPM (R 4.2.0)
 xts              0.13.1    2023-04-16 [1] RSPM (R 4.2.0)
 yaml             2.3.7     2023-01-23 [1] RSPM (R 4.2.0)
 zoo              1.8-12    2023-04-13 [1] RSPM (R 4.2.0)

 [1] /usr/local/lib/R/site-library
 [2] /usr/local/lib/R/library

──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Show code
options(width = 80L)